int enable_intremap(struct iommu *iommu)
{
struct ir_ctrl *ir_ctrl;
- s_time_t start_time;
+ u32 sts;
ASSERT(ecap_intr_remap(iommu->ecap) && iommu_intremap);
iommu->gcmd |= DMA_GCMD_SIRTP;
dmar_writel(iommu->reg, DMAR_GCMD_REG, iommu->gcmd);
- /* Make sure hardware complete it */
- start_time = NOW();
- while ( !(dmar_readl(iommu->reg, DMAR_GSTS_REG) & DMA_GSTS_SIRTPS) )
- {
- if ( NOW() > (start_time + DMAR_OPERATION_TIMEOUT) )
- panic("Cannot set SIRTP field for interrupt remapping\n");
- cpu_relax();
- }
-
+ IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl,
+ (sts & DMA_GSTS_SIRTPS), sts);
+
/* enable comaptiblity format interrupt pass through */
iommu->gcmd |= DMA_GCMD_CFI;
dmar_writel(iommu->reg, DMAR_GCMD_REG, iommu->gcmd);
- start_time = NOW();
- while ( !(dmar_readl(iommu->reg, DMAR_GSTS_REG) & DMA_GSTS_CFIS) )
- {
- if ( NOW() > (start_time + DMAR_OPERATION_TIMEOUT) )
- panic("Cannot set CFI field for interrupt remapping\n");
- cpu_relax();
- }
+ IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl,
+ (sts & DMA_GSTS_CFIS), sts);
/* enable interrupt remapping hardware */
iommu->gcmd |= DMA_GCMD_IRE;
dmar_writel(iommu->reg, DMAR_GCMD_REG, iommu->gcmd);
- start_time = NOW();
- while ( !(dmar_readl(iommu->reg, DMAR_GSTS_REG) & DMA_GSTS_IRES) )
- {
- if ( NOW() > (start_time + DMAR_OPERATION_TIMEOUT) )
- panic("Cannot set IRE field for interrupt remapping\n");
- cpu_relax();
- }
+ IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl,
+ (sts & DMA_GSTS_IRES), sts);
/* After set SIRTP, we should do globally invalidate the IEC */
iommu_flush_iec_global(iommu);
void disable_intremap(struct iommu *iommu)
{
- s_time_t start_time;
+ u32 sts;
ASSERT(ecap_intr_remap(iommu->ecap) && iommu_intremap);
iommu->gcmd &= ~(DMA_GCMD_SIRTP | DMA_GCMD_CFI | DMA_GCMD_IRE);
dmar_writel(iommu->reg, DMAR_GCMD_REG, iommu->gcmd);
- start_time = NOW();
- while ( dmar_readl(iommu->reg, DMAR_GSTS_REG) & DMA_GSTS_IRES )
- {
- if ( NOW() > (start_time + DMAR_OPERATION_TIMEOUT) )
- panic("Cannot clear IRE field for interrupt remapping\n");
- cpu_relax();
- }
+ IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl,
+ !(sts & DMA_GSTS_IRES), sts);
}
{
u32 val;
unsigned long flag;
- s_time_t start_time;
if ( !rwbf_quirk && !cap_rwbf(iommu->cap) )
return;
dmar_writel(iommu->reg, DMAR_GCMD_REG, val);
/* Make sure hardware complete it */
- start_time = NOW();
- for ( ; ; )
- {
- val = dmar_readl(iommu->reg, DMAR_GSTS_REG);
- if ( !(val & DMA_GSTS_WBFS) )
- break;
- if ( NOW() > start_time + DMAR_OPERATION_TIMEOUT )
- panic("%s: DMAR hardware is malfunctional,"
- " please disable IOMMU\n", __func__);
- cpu_relax();
- }
+ IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl,
+ !(val & DMA_GSTS_WBFS), val);
+
spin_unlock_irqrestore(&iommu->register_lock, flag);
}
struct iommu *iommu = (struct iommu *) _iommu;
u64 val = 0;
unsigned long flag;
- s_time_t start_time;
/*
* In the non-present entry flush case, if hardware doesn't cache
dmar_writeq(iommu->reg, DMAR_CCMD_REG, val);
/* Make sure hardware complete it */
- start_time = NOW();
- for ( ; ; )
- {
- val = dmar_readq(iommu->reg, DMAR_CCMD_REG);
- if ( !(val & DMA_CCMD_ICC) )
- break;
- if ( NOW() > start_time + DMAR_OPERATION_TIMEOUT )
- panic("%s: DMAR hardware is malfunctional,"
- " please disable IOMMU\n", __func__);
- cpu_relax();
- }
+ IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG, dmar_readq,
+ !(val & DMA_CCMD_ICC), val);
+
spin_unlock_irqrestore(&iommu->register_lock, flag);
/* flush context entry will implicitly flush write buffer */
return 0;
int tlb_offset = ecap_iotlb_offset(iommu->ecap);
u64 val = 0, val_iva = 0;
unsigned long flag;
- s_time_t start_time;
/*
* In the non-present entry flush case, if hardware doesn't cache
dmar_writeq(iommu->reg, tlb_offset + 8, val);
/* Make sure hardware complete it */
- start_time = NOW();
- for ( ; ; )
- {
- val = dmar_readq(iommu->reg, tlb_offset + 8);
- if ( !(val & DMA_TLB_IVT) )
- break;
- if ( NOW() > start_time + DMAR_OPERATION_TIMEOUT )
- panic("%s: DMAR hardware is malfunctional,"
- " please disable IOMMU\n", __func__);
- cpu_relax();
- }
+ IOMMU_WAIT_OP(iommu, (tlb_offset + 8), dmar_readq,
+ !(val & DMA_TLB_IVT), val);
spin_unlock_irqrestore(&iommu->register_lock, flag);
/* check IOTLB invalidation granularity */
{
u32 cmd, sts;
unsigned long flags;
- s_time_t start_time;
spin_lock(&iommu->lock);
dmar_writel(iommu->reg, DMAR_GCMD_REG, cmd);
/* Make sure hardware complete it */
- start_time = NOW();
- for ( ; ; )
- {
- sts = dmar_readl(iommu->reg, DMAR_GSTS_REG);
- if ( sts & DMA_GSTS_RTPS )
- break;
- if ( NOW() > start_time + DMAR_OPERATION_TIMEOUT )
- panic("%s: DMAR hardware is malfunctional,"
- " please disable IOMMU\n", __func__);
- cpu_relax();
- }
-
+ IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl,
+ (sts & DMA_GSTS_RTPS), sts);
spin_unlock_irqrestore(&iommu->register_lock, flags);
return 0;
{
u32 sts;
unsigned long flags;
- s_time_t start_time;
dprintk(XENLOG_INFO VTDPREFIX,
"iommu_enable_translation: iommu->reg = %p\n", iommu->reg);
spin_lock_irqsave(&iommu->register_lock, flags);
iommu->gcmd |= DMA_GCMD_TE;
dmar_writel(iommu->reg, DMAR_GCMD_REG, iommu->gcmd);
+
/* Make sure hardware complete it */
- start_time = NOW();
- for ( ; ; )
- {
- sts = dmar_readl(iommu->reg, DMAR_GSTS_REG);
- if ( sts & DMA_GSTS_TES )
- break;
- if ( NOW() > start_time + DMAR_OPERATION_TIMEOUT )
- panic("%s: DMAR hardware is malfunctional,"
- " please disable IOMMU\n", __func__);
- cpu_relax();
- }
+ IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl,
+ (sts & DMA_GSTS_TES), sts);
/* Disable PMRs when VT-d engine takes effect per spec definition */
disable_pmr(iommu);
{
u32 sts;
unsigned long flags;
- s_time_t start_time;
spin_lock_irqsave(&iommu->register_lock, flags);
iommu->gcmd &= ~ DMA_GCMD_TE;
dmar_writel(iommu->reg, DMAR_GCMD_REG, iommu->gcmd);
/* Make sure hardware complete it */
- start_time = NOW();
- for ( ; ; )
- {
- sts = dmar_readl(iommu->reg, DMAR_GSTS_REG);
- if ( !(sts & DMA_GSTS_TES) )
- break;
- if ( NOW() > start_time + DMAR_OPERATION_TIMEOUT )
- panic("%s: DMAR hardware is malfunctional,"
- " please disable IOMMU\n", __func__);
- cpu_relax();
- }
+ IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl,
+ !(sts & DMA_GSTS_TES), sts);
spin_unlock_irqrestore(&iommu->register_lock, flags);
}
int enable_qinval(struct iommu *iommu)
{
- s_time_t start_time;
struct qi_ctrl *qi_ctrl;
struct iommu_flush *flush;
+ u32 sts;
qi_ctrl = iommu_qi_ctrl(iommu);
flush = iommu_get_flush(iommu);
dmar_writel(iommu->reg, DMAR_GCMD_REG, iommu->gcmd);
/* Make sure hardware complete it */
- start_time = NOW();
- while ( !(dmar_readl(iommu->reg, DMAR_GSTS_REG) & DMA_GSTS_QIES) )
- {
- if ( NOW() > (start_time + DMAR_OPERATION_TIMEOUT) )
- panic("Cannot set QIE field for queue invalidation\n");
- cpu_relax();
- }
+ IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl,
+ (sts & DMA_GSTS_QIES), sts);
qinval_enabled = 1;
return 0;
void disable_qinval(struct iommu *iommu)
{
- s_time_t start_time;
+ u32 sts;
ASSERT(ecap_queued_inval(iommu->ecap) && iommu_qinval);
dmar_writel(iommu->reg, DMAR_GCMD_REG, iommu->gcmd);
/* Make sure hardware complete it */
- start_time = NOW();
- while ( dmar_readl(iommu->reg, DMAR_GSTS_REG) & DMA_GSTS_QIES )
- {
- if ( NOW() > (start_time + DMAR_OPERATION_TIMEOUT) )
- panic("Cannot clear QIE field for queue invalidation\n");
- cpu_relax();
- }
+ IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl,
+ !(sts & DMA_GSTS_QIES), sts);
}